ifeq ($(CONFIG_VTI),y)
OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
- vtlb.o mmio.o vlsapic.o
+ vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
endif
# perfmon.o
# unwind.o needed for kernel unwinding (rare)
memset(ti, 0, sizeof(struct thread_info));
init_switch_stack(v);
- /* If domain is VMX domain, shared info area is created
- * by domain and then domain notifies HV by specific hypercall.
- * If domain is xenolinux, shared info area is created by
- * HV.
- * Since we have no idea about whether domain is VMX now,
- * (dom0 when parse and domN when build), postpone possible
- * allocation.
- */
+ /* Shared info area is required to be allocated at domain
+ * creation, since control panel will write some I/O info
+ * between front end and back end to that area. However for
+ * vmx domain, our design is to let domain itself to allcoate
+ * shared info area, to keep machine page contiguous. So this
+ * page will be released later when domainN issues request
+ * after up.
+ */
+ d->shared_info = (void *)alloc_xenheap_page();
/* FIXME: Because full virtual cpu info is placed in this area,
* it's unlikely to put it into one shareinfo page. Later
* need split vcpu context from vcpu_info and conforms to
* normal xen convention.
*/
- d->shared_info = NULL;
v->vcpu_info = (void *)alloc_xenheap_page();
if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
-IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
+/*
+ * Return TRUE/FALSE for success of lock operation
+ */
+int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
{
- thash_data_t data, *ovl;
thash_cb_t *hcb;
- search_section_t sections;
- rr_t vrr;
+ rr_t vrr;
+ u64 preferred_size;
- hcb = vmx_vcpu_get_vtlb(vcpu);
- data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
- data.itir=0;
- data.ps = ps;
- data.vadr=PAGEALIGN(va,ps);
- data.section=THASH_TLB_FM;
- data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, va, &vrr);
- data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
-
- ovl = thash_find_overlap(hcb, &data, sections);
- if (ovl) {
- // generate MCA.
- panic("Foreignmap Tlb conflict!!");
- return;
- }
- thash_insert(hcb, &data, va);
- return IA64_NO_FAULT;
+ hcb = vmx_vcpu_get_vtlb(vcpu);
+ va = PAGEALIGN(va,vrr.ps);
+ preferred_size = PSIZE(vrr.ps);
+ return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
}
-
IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
{
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
return IA64_NO_FAULT;
}
search_section_t sections;
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
return IA64_NO_FAULT;
}
thash_data_t data, *ovl;
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
vadr = PAGEALIGN(vadr, ps);
thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_1
+(p6)br.sptk vmx_fault_2
mov r16 = cr.ifa
;;
thash r17 = r16
ENTRY(vmx_break_fault)
mov r31=pr
mov r19=11
- br.sptk.many vmx_dispatch_break_fault
+ mov r30=cr.iim
+ mov r29=0x1100
+ ;;
+ cmp4.eq p6,p7=r29,r30
+ (p6) br.dptk.few vmx_hypercall_dispatch
+ (p7) br.sptk.many vmx_dispatch_break_fault
END(vmx_break_fault)
.org vmx_ia64_ivt+0x3000
ENTRY(vmx_dispatch_break_fault)
- cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
- ;;
VMX_SAVE_MIN_WITH_COVER_R19
;;
+ ;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
mov out0=cr.ifa
adds out1=16,sp
;;
mov rp=r14
br.call.sptk.many b6=vmx_ia64_handle_break
+ ;;
END(vmx_dispatch_break_fault)
+ENTRY(vmx_hypercall_dispatch)
+ VMX_SAVE_MIN_WITH_COVER
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ ssm psr.i // restore psr.i
+ adds r3=16,r2 // set up second base pointer
+ ;;
+ VMX_SAVE_REST
+ ;;
+ movl r14=ia64_leave_hypervisor
+ movl r2=hyper_call_table
+ ;;
+ mov rp=r14
+ shladd r2=r15,3,r2
+ ;;
+ ld8 r2=[r2]
+ ;;
+ mov b6=r2
+ ;;
+ br.call.sptk.many b6=b6
+ ;;
+END(vmx_hypercall_dispatch)
+
+
+
ENTRY(vmx_dispatch_interrupt)
cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
;;
mov rp=r14
br.call.sptk.many b6=vmx_ia64_handle_irq
END(vmx_dispatch_interrupt)
+
+
+
+ .rodata
+ .align 8
+ .globl hyper_call_table
+hyper_call_table:
+ data8 hyper_not_support //hyper_set_trap_table /* 0 */
+ data8 hyper_mmu_update
+ data8 hyper_not_support //hyper_set_gdt
+ data8 hyper_not_support //hyper_stack_switch
+ data8 hyper_not_support //hyper_set_callbacks
+ data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */
+ data8 hyper_sched_op
+ data8 hyper_dom0_op
+ data8 hyper_not_support //hyper_set_debugreg
+ data8 hyper_not_support //hyper_get_debugreg
+ data8 hyper_not_support //hyper_update_descriptor /* 10 */
+ data8 hyper_not_support //hyper_set_fast_trap
+ data8 hyper_dom_mem_op
+ data8 hyper_not_support //hyper_multicall
+ data8 hyper_not_support //hyper_update_va_mapping
+ data8 hyper_not_support //hyper_set_timer_op /* 15 */
+ data8 hyper_event_channel_op
+ data8 hyper_xen_version
+ data8 hyper_not_support //hyper_console_io
+ data8 hyper_not_support //hyper_physdev_op
+ data8 hyper_not_support //hyper_grant_table_op /* 20 */
+ data8 hyper_not_support //hyper_vm_assist
+ data8 hyper_not_support //hyper_update_va_mapping_otherdomain
+ data8 hyper_not_support //hyper_switch_vm86
+ data8 hyper_not_support //hyper_boot_vcpu
+ data8 hyper_not_support //hyper_ni_hypercall /* 25 */
+ data8 hyper_not_support //hyper_mmuext_op
+ data8 hyper_lock_page
+ data8 hyper_set_shared_page
;; \
.mem.offset 0,0; st8.spill [r4]=r20,16; \
.mem.offset 8,0; st8.spill [r5]=r21,16; \
- mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r4]=r22,16; \
.mem.offset 8,0; st8.spill [r5]=r23,16; \
- mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r4]=r24,16; \
.mem.offset 8,0; st8.spill [r5]=r25,16; \
;; \
.mem.offset 0,0; st8.spill [r4]=r28,16; \
.mem.offset 8,0; st8.spill [r5]=r29,16; \
+ mov r26=b6; \
;; \
.mem.offset 0,0; st8.spill [r4]=r30,16; \
.mem.offset 8,0; st8.spill [r5]=r31,16; \
+ mov r27=b7; \
;; \
mov r30=ar.unat; \
;; \
adds r2=PT(B6)-PT(F10),r2; \
adds r3=PT(B7)-PT(F11),r3; \
;; \
- st8 [r2]=r18,16; /* b6 */ \
- st8 [r3]=r19,16; /* b7 */ \
+ st8 [r2]=r26,16; /* b6 */ \
+ st8 [r3]=r27,16; /* b7 */ \
;; \
st8 [r2]=r9; /* ar.csd */ \
st8 [r3]=r10; /* ar.ssd */ \
/* Find overlap TLB entry */
for (cch=priv->cur_cch; cch; cch = cch->next) {
- if ( ((1UL<<cch->section) & priv->s_sect.v) &&
+ if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) &&
__is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
priv->_curva, priv->_eva) ) {
return cch;
void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
{
- if ( hcb->ht != THASH_TLB || entry->section != THASH_TLB_TR ) {
+ if ( hcb->ht != THASH_TLB || entry->tc ) {
panic("wrong parameter\n");
}
entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
* 3: The caller need to make sure the new entry will not overlap
* with any existed entry.
*/
-static void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
thash_data_t *hash_table, *cch;
rr_t vrr;
rr_t vrr;
vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
- if ( entry->ps != vrr.ps && entry->section==THASH_TLB_TC) {
+ if ( entry->ps != vrr.ps && entry->tc ) {
panic("Not support for multiple page size now\n");
}
entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
thash_internal_t *priv = &hcb->priv;
int idx;
- if ( entry->section == THASH_TLB_TR ) {
+ if ( !entry->tc ) {
return rem_tr(hcb, entry->cl, entry->tr_idx);
}
rem_thash(hcb, entry);
thash_data_t *in, search_section_t s_sect)
{
return (hcb->find_overlap)(hcb, in->vadr,
- in->ps, in->rid, in->cl, s_sect);
+ PSIZE(in->ps), in->rid, in->cl, s_sect);
}
static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+ u64 va, u64 size, int rid, char cl, search_section_t s_sect)
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
u64 tag;
rr_t vrr;
- priv->_curva = PAGEALIGN(va,ps);
- priv->_eva = priv->_curva + PSIZE(ps);
+ priv->_curva = va & ~(size-1);
+ priv->_eva = priv->_curva + size;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
}
static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+ u64 va, u64 size, int rid, char cl, search_section_t s_sect)
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
u64 tag;
rr_t vrr;
- priv->_curva = PAGEALIGN(va,ps);
- priv->_eva = priv->_curva + PSIZE(ps);
+ priv->_curva = va & ~(size-1);
+ priv->_eva = priv->_curva + size;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
{
thash_data_t *ovl;
- ovl = (hcb->find_overlap)(hcb, va, ps, rid, cl, p_sect);
+ ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
while ( ovl != NULL ) {
(hcb->rem_hash)(hcb, ovl);
ovl = (hcb->next_overlap)(hcb);
};
}
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ * Notes: Only TC entry can purge and insert.
+ */
+void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
+{
+ thash_data_t *ovl;
+ search_section_t sections;
+
+#ifdef XEN_DEBUGGER
+ vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
+ if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
+ panic ("Oops, wrong call for purge_and_insert\n");
+ return;
+ }
+#endif
+ in->vadr = PAGEALIGN(in->vadr,in->ps);
+ in->ppn = PAGEALIGN(in->ppn, in->ps-12);
+ sections.tr = 0;
+ sections.tc = 1;
+ ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
+ in->rid, in->cl, sections);
+ if(ovl)
+ (hcb->rem_hash)(hcb, ovl);
+#ifdef XEN_DEBUGGER
+ ovl = (hcb->next_overlap)(hcb);
+ if ( ovl ) {
+ panic ("Oops, 2+ overlaps for purge_and_insert\n");
+ return;
+ }
+#endif
+ (hcb->ins_hash)(hcb, in, in->vadr);
+}
/*
* Purge all TCs or VHPT entries including those in Hash table.
return NULL;
}
+/*
+ * Lock/Unlock TC if found.
+ * NOTES: Only the page in prefered size can be handled.
+ * return:
+ * 1: failure
+ * 0: success
+ */
+int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
+{
+ thash_data_t *ovl;
+ search_section_t sections;
+
+ sections.tr = 1;
+ sections.tc = 1;
+ ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
+ if ( ovl ) {
+ if ( !ovl->tc ) {
+// panic("Oops, TR for lock\n");
+ return 0;
+ }
+ else if ( lock ) {
+ if ( ovl->locked ) {
+ DPRINTK("Oops, already locked entry\n");
+ }
+ ovl->locked = 1;
+ }
+ else if ( !lock ) {
+ if ( !ovl->locked ) {
+ DPRINTK("Oops, already unlocked entry\n");
+ }
+ ovl->locked = 0;
+ }
+ return 0;
+ }
+ return 1;
+}
/*
* Notifier when TLB is deleted from hash table and its collision chain.
}
}
-
#ifdef VTLB_DEBUG
static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
u64 sanity_check=0;
typedef union {
unsigned long value;
struct {
- uint64_t ve : 1;
- uint64_t rv1 : 1;
- uint64_t ps : 6;
- uint64_t rid : 24;
- uint64_t rv2 : 32;
+ unsigned long ve : 1;
+ unsigned long rv1 : 1;
+ unsigned long ps : 6;
+ unsigned long rid : 24;
+ unsigned long rv2 : 32;
};
} rr_t;
#endif // CONFIG_VTI
#include "public/xen.h"
#include "asm/tlb.h"
-#define THASH_TLB_TR 0
-#define THASH_TLB_TC 1
-#define THASH_TLB_FM 2 // foreign map
+//#define THASH_TLB_TR 0
+//#define THASH_TLB_TC 1
-#define THASH_SECTION_TR (1<<0)
-#define THASH_SECTION_TC (1<<1)
-#define THASH_SECTION_FM (1<<2)
+
+// bit definition of TR, TC search cmobination
+//#define THASH_SECTION_TR (1<<0)
+//#define THASH_SECTION_TC (1<<1)
/*
* Next bit definition must be same with THASH_TLB_XX
struct {
u32 tr : 1;
u32 tc : 1;
- u32 fm : 1;
- u32 rsv: 29;
+ u32 rsv: 30;
};
u32 v;
} search_section_t;
u64 ig1 : 11; //53-63
};
struct {
- u64 __rv1 : 12;
- // sizeof(domid_t) must be less than 38!!! Refer to its definition
- u64 fm_dom : 38; // 12-49 foreign map domain ID
- u64 __rv2 : 3; // 50-52
+ u64 __rv1 : 53; // 0-52
// next extension to ig1, only for TLB instance
- u64 section : 2; // 53-54 TR, TC or FM (thash_TLB_XX)
+ u64 tc : 1; // 53 TR or TC
+ u64 locked : 1; // 54 entry locked or not
CACHE_LINE_TYPE cl : 1; // I side or D side cache line
u64 nomap : 1; // entry cann't be inserted into machine TLB.
u64 __ig1 : 5; // 56-61
INVALID_ENTRY(hcb, hash) = 1; \
hash->next = NULL; }
-#define PURGABLE_ENTRY(hcb,en) \
- ((hcb)->ht == THASH_VHPT || (en)->section == THASH_TLB_TC)
+#define PURGABLE_ENTRY(hcb,en) \
+ ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
/*
u64 rid, u64 va, u64 sz,
search_section_t p_sect,
CACHE_LINE_TYPE cl);
-extern thash_cb_t *init_domain_tlb(struct vcpu *d);
+extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
/*
* Purge all TCs or VHPT entries including those in Hash table.
thash_data_t *in);
extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
u64 rid, u64 va,CACHE_LINE_TYPE cl);
+extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
extern void purge_machine_tc_by_domid(domid_t domid);
extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
+extern thash_cb_t *init_domain_tlb(struct vcpu *d);
#define VTLB_DEBUG
#ifdef VTLB_DEBUG
struct mmio_list;
typedef struct virutal_platform_def {
//unsigned long *real_mode_data; /* E820, etc. */
- //unsigned long shared_page_va;
+ unsigned long shared_page_va;
//struct vmx_virpit_t vmx_pit;
//struct vmx_handler_t vmx_handler;
//struct mi_per_cpu_info mpci; /* MMIO */